Libraries:
# libraries -------------------
library(dplyr)
library(tidyverse)
library(ggplot2)
library(GGally)
library(glmmTMB)
library(TMB)
library(emmeans)
library(DHARMa)
library(lmtest)
Set working directory
knitr::opts_knit$set(root.dir = '/Users/user/Desktop/Data/Regen_RProj/')
Source sapling data:
source("Scripts/SA_Import.R")
Pivot wider to create dataframe where each row is for one plot and has total details for each species group
sa_merge2 <- sapling_merge %>%
pivot_wider(names_from = Species_Groups, values_from = Total_Tally)
Import time since data and add it to the PIRI dataset
time_since <- read_csv("CleanData/Treat_Year_Data.csv")
sa_merge3 <- merge(sa_merge2, time_since, by = 'Site')
#log transform time from treatment data
sa_merge3$l.TFT <- log(sa_merge3$Time_from_Treat)
Run the ‘Add_BA’ script and merge with dataset:
source("Scripts/Add_BA.R")
# merge with sapling dataset -------------------
sa_merge4 <- merge(sa_merge3, prism_BA, by = 'Plot_No')
Run ‘Ground_Data.R’ script and add it to sapling dataset:
source("Scripts/Ground_Data.R")
# merge with sapling dataset -------------------
sa_merge5 <- merge(sa_merge4, ground3, by = 'Plot_No')
Source and add veg data:
source("Scripts/Veg_Data.R")
# merge with sapling dataset -------------------
sa.all <- merge(sa_merge5, veg3, by = "Plot_No")
rm(sa_merge3,
sa_merge4,
sa_merge5)
Sapling count data is taken in 25m2 plots; basal area is measured in hectares; veg and soil data is taken a 1m2 plots.
Sapling data will be convered into 1m2 plots in order to compare across and reduce the amount of scales of data collection, reducing it to two: 1m2 plots and per hectare observations
sa.all$PIRI.1m <- sa.all$PIRI/25
sa.all$SO.1m <- sa.all$Shrub_Oak/25
sa.all$Other.1m <- sa.all$Other/25
Create log transformed categories for newly added variables, then select for just the desired variables:
sa.all$l.PIRI1 <- log(sa.all$PIRI.1m + 1)
sa.all$l.SO1 <- log(sa.all$SO.1m + 1)
sa.all$l.other1 <- log(sa.all$Other.1m + 1)
sa.all2 <- sa.all %>%
select(Treat_Type, Region, Site, Plot_No, PIRI, l.PIRI1, Shrub_Oak, l.SO1, Other, l.other1, Time_from_Treat, l.TFT, BA_HA, l.BA_HA, PIRI.BA_HA, l.BA_piri, Mineral_Soil, l.Mineral, Litter_Duff, avgLD, avgLD_l, Veg_Total, l.Veg_Total) %>%
arrange(Treat_Type)
Create a dataframe keeping sapling at 25m2 observation levels & log transforming them
sa.all3 <- sa.all
sa.all3$l.PIRI <- log(sa.all3$PIRI + 1)
sa.all3$l.SO <- log(sa.all3$Shrub_Oak + 1)
sa.all3$l.other <- log(sa.all3$Other + 1)
sa.all3 <- sa.all3 %>%
select(Treat_Type, Region, Site, Plot_No, PIRI, l.PIRI, Shrub_Oak, l.SO, Other, l.other, Time_from_Treat, l.TFT, BA_HA, l.BA_HA, PIRI.BA_HA, l.BA_piri, Mineral_Soil, l.Mineral, Litter_Duff, avgLD, avgLD_l, Veg_Total, l.Veg_Total) %>%
arrange(Treat_Type)
Select just for numerical vs log and then look at paired plots for sapling transformed to 1m2 observation level:
#not log transformed (1m2)
sa.num <- sa.all2 %>%
select(PIRI.1m, S0.1m, Other.1m, Time_from_Treat, BA_HA, PIRI.BA_HA, Mineral_Soil, avgLD, Veg_Total, Treat_Type)
ggpairs(sa.num)
ggpairs(sa.num, aes(color = Treat_Type))
#log transformed (1m2)
sa.numl <- sa.all2 %>%
select(l.PIRI1, l.SO1, l.other1, l.TFT, l.BA_HA, l.BA_piri, l.Mineral, avgLD_l, l.Veg_Total, Treat_Type)
ggpairs(sa.numl)
ggpairs(sa.numl, aes(color = Treat_Type))
rm(sa.num,
sa.numl)
Can see the correlation coefficients for linear (Pearsons) relationships. None of them appear very strong, except for ones that are analogs (avg LD vs mineral soil exposure; ba/ha vs piri ba/ha)
No significant correlations obvious from plots
Looking at pairwise again but with sa.all3 dataset (sapling counts at 25m2 observation level)
#not log transformed (25m2)
sa3.num <- sa.all3 %>%
select(PIRI, Shrub_Oak, Other, Time_from_Treat, BA_HA, PIRI.BA_HA, Mineral_Soil, avgLD, Veg_Total, Treat_Type)
ggpairs(sa3.num)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
ggpairs(sa3.num, aes(color = Treat_Type))
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
#log transformed (25m2)
sa3.numl <- sa.all3 %>%
select(l.PIRI, l.SO, l.other, l.TFT, l.BA_HA, l.BA_piri, l.Mineral, avgLD_l, l.Veg_Total, Treat_Type)
ggpairs(sa3.numl)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
ggpairs(sa3.numl, aes(color = Treat_Type))
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## Warning in cor(x, y): the standard deviation is zero
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
rm(sa3.num,
sa3.numl)
Again no strong correlations
Going to try some scatterplots:
#PIRI & SO
ggplot(sa.all3, aes(x=l.PIRI, y = l.SO))+
geom_point()+
geom_smooth(method = lm)+
facet_grid(~Treat_Type)
## `geom_smooth()` using formula = 'y ~ x'
# negative for control, positive for fallrx, flat for other
#PIRI and other
ggplot(sa.all3, aes(x=l.PIRI, y = l.other, fill = Treat_Type))+
geom_point()+
geom_smooth(method = lm) +
facet_grid(~Treat_Type)
## `geom_smooth()` using formula = 'y ~ x'
# negative for control & fall rx, weak negative for springrx and mow, positive for harvest
#PIRI and veg
ggplot(sa.all3, aes(x=l.PIRI, y = l.Veg_Total, fill = Treat_Type))+
geom_point()+
geom_smooth(method = lm) +
facet_grid(~Treat_Type)
## `geom_smooth()` using formula = 'y ~ x'
# positive for harvest, weak negative for springrx, negative for mowrx, fallrx, and control
#relationships varying by treatment type may be a reason why the model are having a hard time in DHARMa
Start with data transformed to 1m plot and no treatment type
sa.m1 <- glmmTMB(PIRI ~ l.SO1 + l.other1 + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all2,
family = poisson)
AIC(sa.m1) #213.7
sa.m1_sr <- simulateResiduals(sa.m1, n = 1000, plot = T)
testZeroInflation(sa.m1_sr) #zero inflated
sa.m1a <- glmmTMB(PIRI ~ l.SO1 + l.other1 + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all2,
ziformula = ~1,
family = poisson)
AIC(sa.m1a) #215.7
sa.m1a_sr <- simulateResiduals(sa.m1a, n = 1000, plot = T) #passes
testZeroInflation(sa.m1a_sr) #still zero inflated, lets test on data set without 1m transformations - sa.all3
# Untransformed sapling counts
sa.m1b <- glmmTMB(PIRI ~ l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m1b) #215.9
sa.m1b_sr <- simulateResiduals(sa.m1b, n = 1000, plot = T) #fails ks
testZeroInflation(sa.m1b_sr) #still very zero inflated
Test models with untransformed counts and see if zero inflation issues resolve as variables decrease?
# test piri ba
sa.m2 <- glmmTMB(PIRI ~ l.SO + l.other + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m2) #217.1
lrtest(sa.m1b, sa.m2) # p = 0.07
# test ba
sa.m3 <- glmmTMB(PIRI ~ l.SO + l.other + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m3) #215.1
#test mineral soil
sa.m4 <- glmmTMB(PIRI ~ l.SO + l.other + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m4) #213.2
lrtest(sa.m3, sa.m4) # p = 0.7
#test litter depth
sa.m5 <- glmmTMB(PIRI ~ l.SO + l.other + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m5) #211.2
#test veg cover
sa.m6 <- glmmTMB(PIRI ~ l.SO + l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m6) #209.4
lrtest(sa.m5, sa.m6) #p = 0.7
#test other
sa.m7 <- glmmTMB(PIRI ~ l.SO + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m7) #253.7
lrtest(sa.m6, sa.m7) #p less than 0.001
#test shrub oak
sa.m8 <- glmmTMB(PIRI ~ l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m8) #249.3
lrtest(sa.m6, sa.m8) #p less than 0.001
#sa.m6 looks best, lets test model
sa.m6_sr <- simulateResiduals(sa.m6, n = 1000, plot = TRUE) #doesn't pass
testZeroInflation(sa.m6_sr) #still fails
#try model with treat type and see if it passes?
sa.m9 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m9) #256.3
lrtest(sa.m6, sa.m9) #p less than 0.001
sa.m9_sr <- simulateResiduals(sa.m9, n = 1000, plot = TRUE) #passes
testZeroInflation(sa.m9_sr) #passes
testResiduals(sa.m9_sr)
#this model looks great. based on modeling below that starts with TT, I'm going to add veg back into this model and run again
sa.m10 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m10) #218.5
lrtest(sa.m10, sa.m9) #weird, is now important
sa.m10_sr <- simulateResiduals(sa.m10, n = 1000, plot = TRUE) #now fails
testResiduals(sa.m10_sr)
Model has much better fit and zero inflation issues are resolved when treatment type variable is included. Time to do elimination again but with TT
sa.m1b <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m1b) #223.4
# test piri ba
sa.m2 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m2) #223.9
lrtest(sa.m1b, sa.m2) # p = 0.1
# test ba
sa.m3 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m3) #222.3
#test mineral soil
sa.m4 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m4) #220.5
lrtest(sa.m3, sa.m4) # p = 0.7
#test litter depth
sa.m5 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m5) #218.5
#test veg cover
sa.m6 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m6) #256.4
lrtest(sa.m5, sa.m6) #p less than 0.001
#test other
sa.m7 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m7) #217.6
lrtest(sa.m5, sa.m7) #p = 0.3
#test shrub oak
sa.m8 <- glmmTMB(PIRI ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.m8) #215.6
lrtest(sa.m7, sa.m8) #p = .8
#so with treatment type included, veg cover is important, but other species of saplings are not
#check model
sa.m8_sr <- simulateResiduals(sa.m8, n = 1000, plot = TRUE) #fails
testZeroInflation(sa.m8_sr) #fails
#try model with so and other saplings?
sa.m5_sr <- simulateResiduals(sa.m5, n = 1000, plot = TRUE)
testZeroInflation(sa.m5_sr) #both still fail ...
Some issues with models failing DHARMa tests despite what model elimination/AIC says is important.
sa.m10 <- glmmTMB(PIRI ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Region,
family = poisson)
AIC(sa.m10) #221.7
## [1] 221.6967
sa.m10_sr <- simulateResiduals(sa.m10, n = 1000, plot = TRUE)
testZeroInflation(sa.m10_sr) #fails
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 1.0632, p-value < 0.00000000000000022
## alternative hypothesis: two.sided
sa.m11 <- glmmTMB(PIRI ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Treat_Type,
family = poisson)
AIC(sa.m11) #223.6
## [1] 223.6341
sa.m11_sr <- simulateResiduals(sa.m11, n = 1000, plot=T)
testZeroInflation(sa.m11_sr) #fails
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 1.0874, p-value < 0.00000000000000022
## alternative hypothesis: two.sided
sa.m12 <- glmmTMB(PIRI ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Region,
family = poisson)
AIC(sa.m12) #213.9 w/1, 250.5 with region
## [1] 250.5171
sa.m12_sr <- simulateResiduals(sa.m12, n = 1000, plot = TRUE) #fails with 1 as ZI; passes with region as zi
testZeroInflation(sa.m12_sr) #fails with 1 as ZI; passes with region as zi
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.99973, p-value = 1
## alternative hypothesis: two.sided
Ok, this is the best fit model so far
sa.m12 <- glmmTMB(PIRI ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Region,
family = poisson)
AIC(sa.m12) #213.9 w/1, 250.5 with region
## [1] 250.5171
#I'd like to try adding so, other, and veg back into this model, checking AIC and model fit
sa.m13 <- glmmTMB(PIRI ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Region,
family = poisson)
AIC(sa.m13) #221.7
## [1] 221.6967
sa.m13_sr <- simulateResiduals(sa.m13, n = 1000, plot = TRUE) #fails
testZeroInflation(sa.m13_sr) #fails
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 1.0632, p-value < 0.00000000000000022
## alternative hypothesis: two.sided
sa.m14 <- glmmTMB(PIRI ~ Treat_Type + l.SO + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Region,
family = poisson)
AIC(sa.m14)
## [1] 250.9238
lrtest(sa.m12, sa.m14) #p = 0.2
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 12 -113.26
## 2 13 -112.46 1 1.5933 0.2069
sa.m15 <- glmmTMB(PIRI ~ Treat_Type + l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
#won't converge w/ region
AIC(sa.m15)
## [1] 255.3798
sa.m15_sr <- simulateResiduals(sa.m15, n = 1000, plot = TRUE)
testZeroInflation(sa.m15_sr)
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.99931, p-value = 0.988
## alternative hypothesis: two.sided
sa.f1 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.f1) #256.4
## [1] 256.386
sa.f2 <- glmmTMB(PIRI ~ Treat_Type + l.other + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
#won't converge w/ region
AIC(sa.f2) #255.4
## [1] 255.3798
sa.f2_sr <- simulateResiduals(sa.f2, n = 1000, plot = TRUE) #passes
testZeroInflation(sa.f2_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.99931, p-value = 0.988
## alternative hypothesis: two.sided
testResiduals(sa.f2_sr) #passes
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.054361, p-value = 0.1054
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0628, p-value = 0.588
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 1, observations = 498, p-value = 0.6
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.004016064
## sample estimates:
## outlier frequency (expected: 0.00070281124497992 )
## 0.002008032
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.054361, p-value = 0.1054
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0628, p-value = 0.588
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 1, observations = 498, p-value = 0.6
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.004016064
## sample estimates:
## outlier frequency (expected: 0.00070281124497992 )
## 0.002008032
lrtest(sa.f1, sa.f2) # p = 0.3 - can drop SO
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + l.other + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.other + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 10 -118.19
## 2 9 -118.69 -1 0.9938 0.3188
sa.f3 <- glmmTMB(PIRI ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~1,
family = poisson)
AIC(sa.f3) #213.9
## [1] 213.9437
sa.f3_sr <- simulateResiduals(sa.f3, n = 1000, plot = TRUE) #fails
testZeroInflation(sa.f3_sr) #fails
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 1.09, p-value < 0.00000000000000022
## alternative hypothesis: two.sided
lrtest(sa.f2, sa.f3) #p is less than 0.001, but the AIC falls so much, weird
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.other + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 9 -118.690
## 2 8 -98.972 -1 39.436 0.000000000339 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
sa.f4 <- glmmTMB(PIRI ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
ziformula = ~Region,
family = poisson)
AIC(sa.f4) #250.5 with region
## [1] 250.5171
sa.f4_sr <- simulateResiduals(sa.f4, n = 1000, plot = TRUE) #passes
testZeroInflation(sa.f4_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.99973, p-value = 1
## alternative hypothesis: two.sided
testResiduals(sa.f4_sr) #passes
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032779, p-value = 0.6584
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.88128, p-value = 0.89
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.005070281
## sample estimates:
## outlier frequency (expected: 0.00104417670682731 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032779, p-value = 0.6584
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.88128, p-value = 0.89
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.005070281
## sample estimates:
## outlier frequency (expected: 0.00104417670682731 )
## 0
AIC(sa.f4, sa.f2)
## df AIC
## sa.f4 12 250.5171
## sa.f2 9 255.3798
#aic is lower for sa.f4 & more df - therefore better model? maybe graph both to see what is going on?
emmeans(sa.f4, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response')
## $emmeans
## Treat_Type rate SE df asymp.LCL asymp.UCL
## Control 0.279 0.121 Inf 0.1189 0.653
## FallRx 0.186 0.130 Inf 0.0475 0.731
## Harvest 0.395 0.346 Inf 0.0708 2.201
## MowRx 0.431 0.317 Inf 0.1018 1.826
## SpringRx 0.111 0.128 Inf 0.0116 1.064
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log scale
##
## $contrasts
## contrast ratio SE df null z.ratio p.value
## Control / FallRx 1.495 1.072 Inf 1 0.561 0.9806
## Control / Harvest 0.706 0.626 Inf 1 -0.393 0.9950
## Control / MowRx 0.646 0.477 Inf 1 -0.591 0.9764
## Control / SpringRx 2.509 2.934 Inf 1 0.787 0.9346
## FallRx / Harvest 0.472 0.429 Inf 1 -0.826 0.9226
## FallRx / MowRx 0.432 0.328 Inf 1 -1.104 0.8047
## FallRx / SpringRx 1.678 2.024 Inf 1 0.429 0.9929
## Harvest / MowRx 0.915 0.806 Inf 1 -0.100 1.0000
## Harvest / SpringRx 3.553 4.533 Inf 1 0.994 0.8584
## MowRx / SpringRx 3.881 4.577 Inf 1 1.150 0.7798
##
## P value adjustment: tukey method for comparing a family of 5 estimates
## Tests are performed on the log scale
Trying to plot with jtools package
library(jtools)
effect_plot(sa.f2, pred=Treat_Type)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=Treat_Type, plot.points = T)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=Treat_Type, partial.residuals = T)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=l.other)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=l.other, plot.points = T)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=l.other, partial.residuals = T)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=l.TFT)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
effect_plot(sa.f2, pred=l.TFT, plot.points = T)
## Using data sa.all3 from global environment. This could cause incorrect
## results if sa.all3 has been altered since the model was fit. You can
## manually provide the data to the "data =" argument.
## Outcome is based on a total of 1 exposures
## Warning in check_dots(..., .action = "warning"): unknown arguments: interval
## Warning in check_dots(..., .action = "warning"): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
#not sure that this is better than sjplot, but good to have options
Reading about graphing offsets:
Plot these babies
library(sjPlot)
## Registered S3 methods overwritten by 'broom':
## method from
## tidy.glht jtools
## tidy.summary.glht jtools
library(sjlabelled)
##
## Attaching package: 'sjlabelled'
## The following object is masked from 'package:forcats':
##
## as_factor
## The following object is masked from 'package:ggplot2':
##
## as_label
## The following object is masked from 'package:dplyr':
##
## as_label
library(sjmisc)
##
## Attaching package: 'sjmisc'
## The following objects are masked from 'package:jtools':
##
## %nin%, center
## The following object is masked from 'package:purrr':
##
## is_empty
## The following object is masked from 'package:tidyr':
##
## replace_na
## The following object is masked from 'package:tibble':
##
## add_case
set_theme(base = theme_classic(),
theme.font = 'serif',
axis.title.size = 1.5,
axis.textsize.x = 1.5,
axis.textsize.y = 1.5,
title.size = 2.5,
title.align = "center",
legend.pos = "right",
legend.size = 1.5,
legend.title.size = 1.5,
#legend.bordercol = "black",
legend.item.size = .75)
plot_model(sa.f4,
type = "pred",
terms = "Treat_Type")
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
plot_model(sa.f4,
type = "pred",
terms = c("l.TFT", "Treat_Type"),
axis.title = c("Time from Last Treatment (log transformed)", "Total Count of Pitch Pine"),
title = "Predicted Counts of Pitch Pine Saplings \n >/= 2.5 cm & < 10 cm DBH",
legend.title = "Treatment Type",
line.size = 1,
show.zeroinf = T,
ci.lvl = NA,
colors = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15"))
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
# plot model two
plot_model(sa.f2,
type = "pred",
terms = c("l.TFT", "Treat_Type"),
axis.title = c("Time from Last Treatment (log transformed)", "Total Count of Pitch Pine"),
title = "Predicted Counts of Pitch Pine Saplings \n >/= 2.5 cm & < 10 cm DBH",
legend.title = "Treatment Type",
line.size = 1,
show.zeroinf = T,
ci.lvl = NA,
colors = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15"))
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
plot_model(sa.f2,
type = "pred",
terms = c("l.other", "Treat_Type"),
axis.title = c("Other Saplings (log transformed)", "Total Count of Pitch Pine"),
title = "Predicted Counts of Pitch Pine Saplings \n >/= 2.5 cm & < 10 cm DBH",
legend.title = "Treatment Type",
line.size = 1,
show.zeroinf = T,
ci.lvl = NA,
colors = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15"))
## Warning in checkTerms(data.tmb1$terms, data.tmb0$terms): Predicting new random effect levels for terms: 1 | Plot_No:Site
## Disable this warning with 'allow.new.levels=TRUE'
#test full models for zero inflation?
so.sa1 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
## Warning in finalizeTMB(TMBStruc, obj, fit, h, data.tmb.old): Model convergence
## problem; singular convergence (7). See vignette('troubleshooting'),
## help('diagnose')
# won't converge - remove mineral
so.sa2 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + l.BA_piri + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa2) # 363.4
## [1] 363.4065
so.sa2_sr <- simulateResiduals(so.sa2, n = 1000, plot = TRUE)
testResiduals(so.sa2_sr) #passes
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.039205, p-value = 0.4283
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.021687, p-value = 0.78
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.009086345
## sample estimates:
## outlier frequency (expected: 0.000742971887550201 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.039205, p-value = 0.4283
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.021687, p-value = 0.78
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.009086345
## sample estimates:
## outlier frequency (expected: 0.000742971887550201 )
## 0
testZeroInflation(so.sa2_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.98398, p-value = 0.576
## alternative hypothesis: two.sided
plot(so.sa2_sr)
# model works, now to check variables
#test piri ba
so.sa3 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa3) #361.4
## [1] 361.4489
#test ba
so.sa4 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa4) #360.9
## [1] 360.9495
lrtest(so.sa3, so.sa4) # p = 0.2
## Likelihood ratio test
##
## Model 1: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + avgLD_l +
## l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + avgLD_l + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 12 -168.72
## 2 11 -169.47 -1 1.5006 0.2206
# test litter depth
so.sa5 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa5) #359
## [1] 358.9832
# test other
so.sa6 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa6) #357.3
## [1] 357.2917
lrtest(so.sa5, so.sa6) # p = 0.6
## Likelihood ratio test
##
## Model 1: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: Shrub_Oak ~ Treat_Type + l.PIRI + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 10 -169.49
## 2 9 -169.65 -1 0.3085 0.5786
# test veg total
so.sa7 <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa7) #355.7
## [1] 355.6606
# test piri
so.sa8 <- glmmTMB(Shrub_Oak ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = sa.all3,
family = poisson)
AIC(so.sa8) #353.9
## [1] 353.919
so.sa8_sr <- simulateResiduals(so.sa8, n = 1000, plot = TRUE)
testResiduals(so.sa8_sr) #passes, quantile issues
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.053579, p-value = 0.1146
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.022894, p-value = 0.848
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.00000000 0.01109438
## sample estimates:
## outlier frequency (expected: 0.000943775100401606 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.053579, p-value = 0.1146
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.022894, p-value = 0.848
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.00000000 0.01109438
## sample estimates:
## outlier frequency (expected: 0.000943775100401606 )
## 0
testZeroInflation(so.sa8_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.98336, p-value = 0.592
## alternative hypothesis: two.sided
so.sa7_sr <- simulateResiduals(so.sa7, n = 1000, plot = TRUE)
# quantiles worse than large model, better than smallest model; wonder if pairwise is any different
emmeans(so.sa8, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response') # no difference
## $emmeans
## Treat_Type rate SE df asymp.LCL asymp.UCL
## Control 0.01175 0.01104789 Inf 0.001859 0
## FallRx 0.05091 0.05002659 Inf 0.007418 0
## Harvest 0.00441 0.00717976 Inf 0.000182 0
## MowRx 0.00000 0.00000017 Inf 0.000000 Inf
## SpringRx 0.00000 0.00000090 Inf 0.000000 Inf
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log scale
##
## $contrasts
## contrast ratio SE df null z.ratio p.value
## Control / FallRx 0 0 Inf 1 -1.213 0.7440
## Control / Harvest 3 5 Inf 1 0.565 0.9800
## Control / MowRx 1058551708 16693563180375 Inf 1 0.001 1.0000
## Control / SpringRx 92630005 659685765162 Inf 1 0.003 1.0000
## FallRx / Harvest 12 21 Inf 1 1.373 0.6448
## FallRx / MowRx 4587698940 72348890990447 Inf 1 0.001 1.0000
## FallRx / SpringRx 401452827 2859038124361 Inf 1 0.003 1.0000
## Harvest / MowRx 397640263 6270863130598 Inf 1 0.001 1.0000
## Harvest / SpringRx 34796051 247808041533 Inf 1 0.002 1.0000
## MowRx / SpringRx 0 1514 Inf 1 0.000 1.0000
##
## P value adjustment: tukey method for comparing a family of 5 estimates
## Tests are performed on the log scale
emmeans(so.sa2, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response') # no difference
## $emmeans
## Treat_Type rate SE df asymp.LCL asymp.UCL
## Control 0.01017 0.00986722 Inf 0.001518 0
## FallRx 0.05012 0.04975848 Inf 0.007161 0
## Harvest 0.00474 0.00776924 Inf 0.000191 0
## MowRx 0.00000 0.00000042 Inf 0.000000 Inf
## SpringRx 0.00000 0.00000008 Inf 0.000000 Inf
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log scale
##
## $contrasts
## contrast ratio SE df null z.ratio p.value
## Control / FallRx 0 0 Inf 1 -1.300 0.6915
## Control / Harvest 2 4 Inf 1 0.436 0.9925
## Control / MowRx 176074640 1266464451287 Inf 1 0.003 1.0000
## Control / SpringRx 8744457238 627708790666443 Inf 1 0.000 1.0000
## FallRx / Harvest 11 19 Inf 1 1.318 0.6803
## FallRx / MowRx 867864800 6242352198604 Inf 1 0.003 1.0000
## FallRx / SpringRx 43101076982 3093951308062437 Inf 1 0.000 1.0000
## Harvest / MowRx 82072694 590330049164 Inf 1 0.003 1.0000
## Harvest / SpringRx 4076005278 292590411794187 Inf 1 0.000 1.0000
## MowRx / SpringRx 50 3582867 Inf 1 0.000 1.0000
##
## P value adjustment: tukey method for comparing a family of 5 estimates
## Tests are performed on the log scale
Quantiles better in the larger model; neither have a pairwise treatment type difference